Close

@InProceedings{LayzaPedrTorr:2020:1tLaMa,
               author = "Layza, Jaime Rocca and Pedrini, Helio and Torres, Ricardo da 
                         Silva",
          affiliation = "Institute of Computing, University of Campinas, Campinas, SP, 
                         Brazil, 13083-852 and Institute of Computing, University of 
                         Campinas, Campinas, SP, Brazil, 13083-852 and Department of ICT 
                         and Natural Sciences, Norwegian University of Science and 
                         Technology (NTNU)",
                title = "1-to-N Large Margin Classifier",
            booktitle = "Proceedings...",
                 year = "2020",
               editor = "Musse, Soraia Raupp and Cesar Junior, Roberto Marcondes and 
                         Pelechano, Nuria and Wang, Zhangyang (Atlas)",
         organization = "Conference on Graphics, Patterns and Images, 33. (SIBGRAPI)",
            publisher = "IEEE Computer Society",
              address = "Los Alamitos",
             keywords = "Large Margin Classifier, Noise Label Data, Adversarial Attacks.",
             abstract = "Cross entropy with softmax is the standard loss function for 
                         classification in neural networks. However, this function can 
                         suffer from limitations on discriminative power, lack of 
                         generalization, and propensity to overfitting. In order to address 
                         these limitations, several approaches propose to enforce a margin 
                         on the top of the neural network specifically at the softmax 
                         function. In this work, we present a novel formulation that aims 
                         to produce generalization and noise label robustness not only by 
                         imposing a margin at the top of the neural network, but also by 
                         using the entire structure of the mini-batch data. Based on the 
                         distance used for SVM to obtain maximal margin, we propose a 
                         broader distance definition called 1-to-N distance and an 
                         approximated probability function as the basis for our proposed 
                         loss function. We perform empirical experimentation on MNIST, 
                         CIFAR-10, and ImageNet32 datasets to demonstrate that our loss 
                         function has better generalization and noise label robustness 
                         properties than the traditional cross entropy method, showing 
                         improvements in the following tasks: generalization robustness, 
                         robustness in noise label data, and robustness against adversarial 
                         examples attacks.",
  conference-location = "Porto de Galinhas (virtual)",
      conference-year = "7-10 Nov. 2020",
                  doi = "10.1109/SIBGRAPI51738.2020.00050",
                  url = "http://dx.doi.org/10.1109/SIBGRAPI51738.2020.00050",
             language = "en",
                  ibi = "8JMKD3MGPEW34M/43992TE",
                  url = "http://urlib.net/ibi/8JMKD3MGPEW34M/43992TE",
           targetfile = "PID6615191.pdf",
        urlaccessdate = "2024, May 05"
}


Close